ASSERT(spin_is_locked(&d->arch.shadow_lock));
SH_VLOG("shadow mode table op %p %p count %d",
- pagetable_val(d->exec_domain[0]->arch.pagetable), /* XXX SMP */
+ pagetable_val(d->exec_domain[0]->arch.guest_table), /* XXX SMP */
pagetable_val(d->exec_domain[0]->arch.shadow_table), /* XXX SMP */
d->arch.shadow_page_count);
if ( !(sl1ss & PSH_shadowed) )
{
/* This L1 is NOT already shadowed so we need to shadow it. */
- SH_VVLOG("4a: l1 not shadowed ( %p )", sl1pfn);
+ SH_VVLOG("4a: l1 not shadowed ( %p )", sl1ss);
sl1mfn_info = alloc_shadow_page(d);
sl1mfn_info->u.inuse.type_info = PGT_l1_page_table;
SH_VVLOG("shadow_fault( va=%p, code=%ld )", va, error_code );
- check_pagetable(d, ed->arch.pagetable, "pre-sf");
+ check_pagetable(d, ed->arch.guest_table, "pre-sf");
/*
* STEP 1. A fast-reject set of checks with no locking.
shadow_unlock(d);
- check_pagetable(d, ed->arch.pagetable, "post-sf");
+ check_pagetable(d, ed->arch.guest_table, "post-sf");
return EXCRET_fault_fixed;
}
unsigned long eip;
unsigned long gpa;
int result;
+ struct exec_domain *ed = current;
#if VMX_DEBUG
{
}
#endif
+ /*
+ * If vpagetable is zero, then we are still emulating 1:1 page tables,
+ * and we should have never gotten here.
+ */
+ if ( !ed->arch.vpagetable )
+ {
+ printk("vmx_do_page_fault while still running on 1:1 page table\n");
+ return 0;
+ }
+
gpa = gva_to_gpa(va);
if (!gpa)
return 0;
break;
}
default:
- __vmx_bug(®s);
+ printk("unexpected VMexit for exception vector 0x%x\n", vector);
+ //__vmx_bug(®s);
break;
}
break;
void vmx_do_resume(struct exec_domain *d)
{
+ if ( d->arch.vpagetable )
+ __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
+ else
+ // we haven't switched off the 1:1 pagetable yet...
+ __vmwrite(GUEST_CR3, pagetable_val(d->arch.guest_table));
+
__vmwrite(HOST_CR3, pagetable_val(d->arch.monitor_table));
- __vmwrite(GUEST_CR3, pagetable_val(d->arch.shadow_table));
__vmwrite(HOST_ESP, (unsigned long)get_stack_bottom());
if (event_pending(d)) {